# Importing dependencies:
import cv2
import glob
import pickle
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
from IPython.display import HTML
%matplotlib inline
print('Imported all dependencies!')
# Opening and reading an image (one provided by Udacity):
image = cv2.imread('camera_cal/calibration2.jpg')
# Importing several images for testing:
images = glob.glob('camera_cal/calibration*.jpg')
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image)
# Setting image size:
# Number of inside corners in X:
nx = 9
# Number of inside corners in Y:
ny = 6
# Creating object points:
# Array that will hold 3D points of the real world:
objpoints = []
# Array that will hold 2D in the image plane:
imagepoints = []
print('Finished setting the arrays!')
# Creating function that find the points in the images:
def find_points():
# Iterating through the images found in the folder:
for fname in images:
# Saving the images into an empty variable:
img = cv2.imread(fname)
# Grayscaling images from color to gray:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Looking for chess board corners:
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If the function finds corners:
if ret == True:
# Preparing the object points:
objp = np.zeros((6 * 9, 3), np.float32)
# Reshaping the coordinates given from ngrid to 2 columns X & Y:
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Appending the corners and the object points to the empty arrays created before:
objpoints.append(objp)
imagepoints.append(corners)
# Drawing corners on the chessboard:
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
# Calibrating camera:
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imagepoints, gray.shape[::-1], None, None)
return mtx, dist
print('Function is ready to use!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Showing the original image before passing it through the function:
ax1.imshow(image)
ax1.set_title('Original Image:', fontsize=40)
# Saving the found points image in a variable and folder:
mtx, dist = find_points()
image_points = image
plt.imsave('output_images/image_points.jpg', image_points)
print('Image saved in directory!')
# Showing the founds points image after passing it through the function:
ax2.imshow(image_points)
ax2.set_title('Points Found on Image:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Calibrating and undistorting the images:
def undistort_img(img):
# Test undistortion on an image:
img_size = (img.shape[1], img.shape[0])
# Creating the destination of the undistorted image:
dst = cv2.undistort(img, mtx, dist, None, mtx)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs):
# dist_pickle = {}
# dist_pickle["mtx"] = mtx
# dist_pickle["dist"] = dist
# pickle.dump(dist_pickle, open("camera_cal/wide_dist_pickle.p", "wb"))
return dst
print('Function ready to use!')
# Showing the original image before passing it through the function:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(image)
ax1.set_title('Original Image:', fontsize=30)
# Saving the undistorted image in a variable and folder:
undistort_image = undistort_img(image)
plt.imsave('output_images/undistort_image.jpg', undistort_image)
print('Image saved in directory!')
# Showing the undistorted image after passing it through the function:
ax2.imshow(undistort_image)
ax2.set_title('Undistorted Image:', fontsize=30)
# Creating a function that applies Sobel:
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh_min=0, thresh_max=255):
# Converting image to grayscale:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Taking the derivate of X & Y:
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# Taking the absolute value of the derivative of the gradients.
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# Scaling the image to 8-bit (0-255) and converting to np.uint8
scaled_model = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Creating a mask of 1s where the scaled gradient magnitude number is < thresh min and > the thresh max:
sxbinary = np.zeros_like(scaled_model)
sxbinary[(scaled_model >= thresh_min) & (scaled_model <= thresh_max)] = 1
# Creating variable to save sxbinary:
binary_output = sxbinary
return binary_output
print('Function ready to use!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Showing the original undistorted image before passing it through the function:
ax1.imshow(undistort_image)
ax1.set_title('Undistorted Original Image:', fontsize=40)
# Saving the thresholded gradient in a variable and folder:
thresholded_image = abs_sobel_thresh(undistort_image, orient='x', sobel_kernel=3, thresh_min=20, thresh_max=100)
plt.imsave('output_images/thresholded_image.jpg', thresholded_image)
print('Image saved in directory!')
# Showing the thresholded gradient after passing it through the function:
ax2.imshow(thresholded_image, cmap='gray')
ax2.set_title('Thresholded Gradient:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
highway_test1 = cv2.imread('test_images/straight_lines1.jpg')
# Displaying threshold gradient in highway:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the thresholded gradient in a variable and folder:
highway_threshold_test1 = abs_sobel_thresh(highway_test1, orient='x', sobel_kernel=3, thresh_min=20, thresh_max=100)
plt.imsave('output_images/highway_threshold_test1.jpg', highway_threshold_test1)
print('Image saved in directory!')
# Showing the thresholded gradient after passing it through the function:
ax2.imshow(highway_threshold_test1, cmap='gray')
ax2.set_title('Thresholded Gradient:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Creating function that applies Magnitude of Gradient:
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Converting image to grayscale:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Taking the derivate of X & Y:
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# Calculating the magnitude:
abs_sobelx = np.sqrt(sobelx**2 + sobely**2)
# Scaling the image to 8-bit (0-255) and converting to np.uint8
scaled_model = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
# Creating a mask of 1s where the scaled gradient magnitude number is < thresh min and > the thresh max:
sxbinary = np.zeros_like(scaled_model)
sxbinary[(scaled_model > mag_thresh[0]) & (scaled_model < mag_thresh[1])] = 1
# Creating variable to save sxbinary:
binary_output = sxbinary
return binary_output
print('Function ready to use!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Showing the original undistorted image before passing it through the function:
ax1.imshow(undistort_image)
ax1.set_title('Undistorted Original Image:', fontsize=40)
# Saving the thresholded magnitude in a variable and folder:
magnitude_image = mag_thresh(undistort_image, sobel_kernel=3, mag_thresh=(30, 100))
plt.imsave('output_images/magnitude_image.jpg', magnitude_image)
print('Image saved in directory!')
# Showing the thresholded magnitude after passing it through the function:
ax2.imshow(magnitude_image, cmap='gray')
ax2.set_title('Thresholded Magnitude:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying threshold gradient in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the magnitude thresholded in a variable and folder:
highway_magnitude_test1 = mag_thresh(highway_test1, sobel_kernel=3, mag_thresh=(30, 100))
plt.imsave('output_images/highway_magnitude_test1.jpg', highway_magnitude_test1)
print('Image saved in directory!')
# Showing the magnitude thresholded after passing it through the function:
ax2.imshow(highway_magnitude_test1, cmap='gray')
ax2.set_title('Thresholded Magnitude:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Creating the function direction of the gradient:
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Converting image to grayscale:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Taking the derivate of X & Y:
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# Getting the absolute value of X & Y:
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# Calculating the direction of gradient:
abs_gradient = np.arctan2(abs_sobely, abs_sobely)
# Creating a mask of 1s where the scaled gradient magnitude number is < thresh min and > the thresh max:
sxbinary = np.zeros_like(abs_gradient)
sxbinary[(abs_gradient > thresh[0]) & (abs_gradient < thresh[1])] = 1
# Creating variable to save sxbinary:
binary_output = sxbinary
return binary_output
print('Function ready to use!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Showing the original undistorted image before passing it through the function:
ax1.imshow(undistort_image)
ax1.set_title('Undistorted Original Image:', fontsize=40)
# Saving the direction of gradient in a variable and folder:
dir_gradient_image = dir_threshold(undistort_image, sobel_kernel=15, thresh=(0.7, 1.3))
plt.imsave('output_images/dir_gradient_image.jpg', dir_gradient_image)
print('Image saved in directory!')
# Showing the direction of gradient after passing it through the function:
ax2.imshow(dir_gradient_image, cmap='gray')
ax2.set_title('Direction of Gradient:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying direction of gradient in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the direction of gradient in a variable and folder:
highway_direction_gradient_test1 = dir_threshold(highway_test1, sobel_kernel=15, thresh=(0.7, 1.3))
plt.imsave('output_images/highway_direction_gradient_test1.jpg', highway_direction_gradient_test1)
print('Image saved in directory!')
# Showing the direction of gradient after passing it through the function:
ax2.imshow(highway_magnitude_test1, cmap='gray')
ax2.set_title('Direction of Gradient:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Combining abs_sobel_thresh, mag_thresh & dir_threshold functions:
# Choosing a Sobel kernel size:
ksize = 7
# Applying each of the threshold functions:
gradx = abs_sobel_thresh(highway_test1, orient='x', sobel_kernel=ksize, thresh_min=20, thresh_max=100)
grady = abs_sobel_thresh(highway_test1, orient='y', sobel_kernel=ksize, thresh_min=20, thresh_max=100)
mag_binary = mag_thresh(highway_test1, sobel_kernel=ksize, mag_thresh=(170, 255))
dir_binary = dir_threshold(highway_test1, sobel_kernel=ksize, thresh=(0, np.pi/2))
# Combining the actual functions:
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
print('Functions combined!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying combined threshold in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the combined thresholded and saving in a variable and folder:
plt.imsave('output_images/highway_combined_test1.jpg', combined)
print('Image saved in directory!')
# Showing the combined thresholded after passing it through the function:
ax2.imshow(combined, cmap='gray')
ax2.set_title('Combined Threshold:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Creating HLS function:
def hls_select(img, thresh=(0, 255)):
# Converting the images to HLS:
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Seperating the 3 different channels:
H = hls[:, :, 0]
L = hls[:, :, 1]
S = hls[:, :, 2]
# Applying threshold to the channels:
binary = np.zeros_like(S)
binary[(S > thresh[0]) & (S <= thresh[1])] = 1
binary_output = binary
return binary_output
print('Function is ready to use!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying S threshold in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the S thresholded and saving in a variable and folder:
highway_S_channel_test1 = hls_select(highway_test1, thresh=(50, 255))
plt.imsave('output_images/highway_S_channel_test1.jpg', highway_S_channel_test1)
print('Image saved in directory!')
# Showing the S thresholded after passing it through the function:
ax2.imshow(highway_S_channel_test1, cmap='gray')
ax2.set_title('S Threshold:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Showing the original image before passing it through the function:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=30)
# Saving the undistorted highway image in a variable and folder:
highway_undistort_test1 = undistort_img(highway_test1)
plt.imsave('output_images/highway_undistort_test1.jpg', highway_undistort_test1)
print('Image saved in directory!')
# Showing the undistorted image after passing it through the function:
ax2.imshow(highway_undistort_test1)
ax2.set_title('Undistorted Image:', fontsize=30)
# Showing the original image before passing it through the function:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(highway_undistort_test1)
ax1.set_title('Undistort Original Image:', fontsize=30)
# Saving the undistorted highway image in a variable and folder:
highway_S_channel_undistort_test1 = hls_select(highway_test1, thresh=(50, 255))
plt.imsave('output_images/highway_S_channel_undistort_test1.jpg', highway_S_channel_undistort_test1)
print('Image saved in directory!')
# Showing the undistorted image after passing it through the function:
ax2.imshow(highway_S_channel_undistort_test1)
ax2.set_title('Undistorted S-Channel Image:', fontsize=30)
# Creating function that combines thresholds:
def combining_threshold(img):
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Grayscale image
# NOTE: we already saw that standard grayscaling lost color information for the lane lines
# Explore gradients in other colors spaces / color channels to see what might work better
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return color_binary, combined_binary
print('Function ready to use!')
# Plotting thresholded images:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
# Showing the stacked thresholds image:
highway_stacked_threshold_test1, _ = combining_threshold(highway_undistort_test1)
ax1.set_title('Stacked thresholds:', fontsize=20)
ax1.imshow(highway_stacked_threshold_test1)
plt.imsave('output_images/highway_stacked_threshold_test1.jpg', highway_stacked_threshold_test1, _)
# Showing the combined S-Channel and gradient threshold:
_, highway_combined_S_and_thresholds_test1 = combining_threshold(highway_undistort_test1)
ax2.set_title('Combined S channel and gradient thresholds:', fontsize=20)
ax2.imshow(highway_combined_S_and_thresholds_test1, cmap='gray')
plt.imsave('output_images/highway_combined_S_and_thresholds_test1.jpg', _, highway_combined_S_and_thresholds_test1)
print('Image saved in directory!')
# Setting the source points:
# top_left = [585, 460]
# top_right = [203, 720]
# bottom_right = [1127, 720]
# bottom_left = [695, 460]
def img_size(img):
size = (img.shape[1], img.shape[0])
return size
img_size = img_size(highway_combined_S_and_thresholds_test1)
# Calling the source points in an array:
src = np.float32([
[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],
[((img_size[0] / 6) - 10), img_size[1]],
[(img_size[0] * 5 / 6) + 60, img_size[1]],
[(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])
# Setting the destination points:
# top_left_dst = [320, 0]
# top_right_dst = [320, 720]
# bottom_right_dst = [960,720]
# bottom_left_dst = [796, 0]
# Placing the destination points in an array:
dst = np.float32(
[[(img_size[0] / 4), 0],
[(img_size[0] / 4), img_size[1]],
[(img_size[0] * 3 / 4), img_size[1]],
[(img_size[0] * 3 / 4), 0]])
print('Source and destination points set up!')
# Creating function that gives a birds eye view of image:
def birds_eye_view(img):
# Grabbing the size of the image:
size_img = (img.shape[1], img.shape[0])
# Calculating the perspective transform given the source and destination points:
M = cv2.getPerspectiveTransform(src, dst)
# Warping the image using the warpPerspective function:
warped = cv2.warpPerspective(img, M, size_img)
return warped
print('Function ready to use!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying birds eye view in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the birds eye view image in a variable and folder:
highway_birds_eye_view_test1 = birds_eye_view(highway_combined_S_and_thresholds_test1)
plt.imsave('output_images/highway_birds_eye_view_test1.jpg', highway_birds_eye_view_test1)
print('Image saved in directory!')
# Showing the birds eye view image after passing it through the function:
ax2.imshow(highway_birds_eye_view_test1, cmap='gray')
ax2.set_title('Birds Eye View:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Plotting histogram to show the lanes:
histogram = np.sum(highway_birds_eye_view_test1[highway_birds_eye_view_test1.shape[0]//2:, :], axis=0)
print('Histogram of lanes found from birds eye view:')
plt.plot(histogram)
# Create an output image to draw on and visualize the result:
out_img = np.dstack((highway_birds_eye_view_test1, highway_birds_eye_view_test1, highway_birds_eye_view_test1))*255
# Find the peak of the left and right halves of the histogram:
# These will be the starting point for the left and right lines:
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choosing the number of sliding windows:
nwindows = 9
# Setting height of windows:
window_height = np.int(highway_birds_eye_view_test1.shape[0]/nwindows)
# Identifying the X and Y positions of all nonzero pixels in the image
nonzero = highway_birds_eye_view_test1.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window:
leftx_current = leftx_base
rightx_current = rightx_base
# Setting the width of the windows +/- margin:
margin = 100
# Setting minimum number of pixels found to recenter window:
minpix = 50
# Creating empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Stepping through the windows one by one:
for window in range(nwindows):
# Identifying window boundaries in X and Y (and right and left):
win_y_low = highway_birds_eye_view_test1.shape[0] - (window+1)*window_height
win_y_high = highway_birds_eye_view_test1.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Drawing the windows on the visualization image:
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2)
# Identifying the nonzero pixels in X and Y within the window:
good_left_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) &
(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) &
(nonzerox < win_xright_high)).nonzero()[0]
# Appendding these indices to the lists:
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If found > minpix pixels, recenter next window on their mean position:
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenating the arrays of indices:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extracting left and right line pixel positions:
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fitting a second order polynomial to each:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
print('Finished implemention of sliding windows!')
# Plotting and visualizing the sliding windows:
# Generate x and y values for plotting:
ploty = np.linspace(0, highway_birds_eye_view_test1.shape[0]-1, highway_birds_eye_view_test1.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Plotting image:
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
# Defining conversions in X and Y from pixels space to meters:
# Meters per pixel in y dimension:
ym_per_pix = 30/720
# Meters per pixel in x dimension:
xm_per_pix = 3.7/700
# Fitting new polynomials to X, Y in world space:
left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
# Calculate the new radii of curvature:
left_curverad = ((1 + (2 * left_fit_cr[0] * 720 * ym_per_pix + left_fit_cr[1])**2) **1.5) / np.absolute(2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * 720 * ym_per_pix + right_fit_cr[1])**2) **1.5) / np.absolute(2 * right_fit_cr[0])
print('Function ready to use!')
# Now our radius of curvature is in meters:
print('Left Curverad:', left_curverad, 'meters')
print('Right Curverad:', right_curverad, 'meters')
# Create an image to draw the lines on:
warp_zero = np.zeros_like(highway_birds_eye_view_test1).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly():
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image:
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv):
Minv = cv2.getPerspectiveTransform(dst, src)
newwarp = cv2.warpPerspective(color_warp, Minv, (highway_test1.shape[1], highway_test1.shape[0]))
# Combine the result with the original image:
result = cv2.addWeighted(highway_undistort_test1, 1, newwarp, 0.3, 0)
print('Shades drawn!')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying birds eye view in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the shaded lane image in a variable and folder:
highway_shaded_test1 = result
plt.imsave('output_images/highway_shaded_test1.jpg', highway_shaded_test1)
print('Image saved in directory!')
# Showing the shaded lane image after passing it through the function:
ax2.imshow(highway_shaded_test1, cmap='gray')
ax2.set_title('Shaded Lane:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
def curvature(img):
# Adding information of the shaded images:
y_max = np.max(ploty)
# Calculating the left side of the image:
left = left_fit[0] * y_max**2 + left_fit[1] * y_max + left_fit[2]
# Calculating the right side of the image:
right = right_fit[0] * y_max**2 + right_fit[1] * y_max + right_fit[2]
# Calculating the middle of the image:
mid = (left + right) / 2
# Position of car in respect to the center of the image:
pos = (mid - 640) * xm_per_pix
cv2.putText(img, 'Radius of curvature: {} m'.format((left_curverad + right_curverad) // 2), (100, 80),
fontFace = 16, fontScale = 1, color=(255, 255, 255), thickness = 2)
if pos > 0:
cv2.putText(img, 'Vehicle is {:.2f}m left of center'.format(pos), (100, 120),
fontFace = 16, fontScale = 1, color=(255,255,255), thickness = 2)
else:
cv2.putText(img, 'Vehicle is {:.2f}m right of center'.format(pos), (100, 120),
fontFace = 16, fontScale = 1, color=(255,255,255), thickness = 2)
return img
print('Extra information added')
# Format of showing the original and calibrated images taken from Udacity:
# Formatting the amount of columns and the size of the images that will be displayed:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
# Displaying birds eye view in highway picture:
# Showing the original image before passing it through the function:
ax1.imshow(highway_test1)
ax1.set_title('Original Image:', fontsize=40)
# Saving the shaded lane image in a variable and folder:
highway_extra_info_test1 = curvature(highway_shaded_test1)
plt.imsave('output_images/highway_shaded_test1.jpg', highway_extra_info_test1)
print('Image saved in directory!')
# Showing the shaded lane image after passing it through the function:
ax2.imshow(highway_extra_info_test1, cmap='gray')
ax2.set_title('Shaded Lane:', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
# Saving the video file in a variable:
highway_video = 'project_video.mp4'
clip = VideoFileClip('project_video.mp4')
# Showing the video:
HTML("""
<video width="640" height="420" controls>
<source src="{0}">
</video>
""".format(highway_video))
# Undistorting video:
img_size = (1280, 720)
undist_video = 'output_video/undistort_video.mp4'
clip1 = VideoFileClip("project_video.mp4")
write_clip = clip.fl_image(undistort_img)
%time write_clip.write_videofile(undist_video, audio=False)
# Showing the video:
HTML("""
<video width="640" height="420" controls>
<source src="{0}">
</video>
""".format(undist_video))
undist_video_w_info = 'output_video/undistort_w_info_video.mp4'
clip1 = VideoFileClip("project_video.mp4")
write_clip = clip.fl_image(curvature)
%time write_clip.write_videofile(undist_video_w_info, audio=False)
# Showing the video:
HTML("""
<video width="640" height="420" controls>
<source src="{0}">
</video>
""".format(undist_video_w_info))
# Defining conversions in X and Y from pixels space to meters:
# Meters per pixel in y dimension:
ym_per_pix = 30/720
# Meters per pixel in x dimension:
xm_per_pix = 3.7/700
# Fitting new polynomials to X, Y in world space:
left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
# Calculate the new radii of curvature:
left_curverad = ((1 + (2 * left_fit_cr[0] * 720 * ym_per_pix + left_fit_cr[1])**2) **1.5) / np.absolute(2 * left_fit_cr[0])
right_curverad = ((1 + (2 * right_fit_cr[0] * 720 * ym_per_pix + right_fit_cr[1])**2) **1.5) / np.absolute(2 * right_fit_cr[0])
# Now our radius of curvature is in meters:
print('Left Curverad:', left_curverad, 'meters')
print('Right Curverad:', right_curverad, 'meters')
# Creating a function that finds lanes:
def find_lane(video):
# Undistorting video:
undist = undistort_img(video)
# Getting Perspective Transform:
m = cv2.getPerspectiveTransform(src, dst)
m_inverse = cv2.getPerspectiveTransform(src, dst)
# Warping the video:
warped = cv2.warpPerspective(undist, m, img_size, flags=cv2.INTER_LINEAR)
_, binary_warped = combining_threshold(warped)
# Creating the histogram:
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result:
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Find the peak of the left and right halves of the histogram:
# These will be the starting point for the left and right lines:
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choosing the number of sliding windows:
nwindows = 9
# Setting height of windows:
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identifying the X and Y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window:
leftx_current = leftx_base
rightx_current = rightx_base
# Setting the width of the windows +/- margin:
margin = 100
# Setting minimum number of pixels found to recenter window:
minpix = 50
# Creating empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Stepping through the windows one by one:
for window in range(nwindows):
# Identifying window boundaries in X and Y (and right and left):
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Drawing the windows on the visualization image:
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 2)
# Identifying the nonzero pixels in X and Y within the window:
good_left_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) &
(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) &
(nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) &
(nonzerox < win_xright_high)).nonzero()[0]
# Appendding these indices to the lists:
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If found > minpix pixels, recenter next window on their mean position:
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenating the arrays of indices:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extracting left and right line pixel positions:
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fitting a second order polynomial to each:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting:
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
Minv = cv2.getPerspectiveTransform(dst, src)
newwarp = cv2.warpPerspective(color_warp, Minv, img_size)
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
result_w_info = curvature(result)
return result_w_info
print('Function ready to use!')
# Finding lanes in project video:
project_lane_video = 'output_video/project_lane_video.mp4'
clip1 = VideoFileClip("project_video.mp4")
write_clip = clip1.fl_image(find_lane)
%time write_clip.write_videofile(project_lane_video, audio=False)
# Showing the video:
HTML("""
<video width="640" height="420" controls>
<source src="{0}">
</video>
""".format(project_lane_video))
# Finding lanes in project video:
project_harder_video = 'output_video/project_harder_video.mp4'
clip1 = VideoFileClip("harder_challenge_video.mp4")
write_clip = clip1.fl_image(find_lane)
%time write_clip.write_videofile(project_harder_video, audio=False)
# Showing the video:
HTML("""
<video width="640" height="420" controls>
<source src="{0}">
</video>
""".format(project_harder_video))
# Finding lanes in project video:
project_challenge_video = 'output_video/project_challenge_video.mp4'
clip1 = VideoFileClip("challenge_video.mp4")
write_clip = clip1.fl_image(find_lane)
%time write_clip.write_videofile(project_challenge_video, audio=False)
# Showing the video:
HTML("""
<video width="640" height="420" controls>
<source src="{0}">
</video>
""".format(project_challenge_video))